jne test_all_events
jmp restore_all
- ALIGN
-ret_from_exception:
- movb CS(%esp),%al
- testb $3,%al # return to non-supervisor?
- jne process_guest_exception_and_events
- jmp restore_all
-
- ALIGN
-
ENTRY(divide_error)
pushl $0 # no error code
pushl $ SYMBOL_NAME(do_divide_error)
movl %edx,%es
GET_CURRENT(%ebx)
call *%edi
- # NB. We reenable interrupts AFTER exception processing, as that is
- # required by the page fault handler (needs to save %cr2)
- sti
addl $8,%esp
- jmp ret_from_exception
+ movb CS(%esp),%al
+ testb $3,%al
+ je restore_all
+ jmp process_guest_exception_and_events
ENTRY(coprocessor_error)
pushl $0
pushl %edx
call SYMBOL_NAME(do_nmi)
addl $8,%esp
- RESTORE_ALL
+ jmp restore_all
ENTRY(int3)
pushl $0
panic("HYPERVISOR DEATH!!\n");
}
-static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+#define check_selector(_s) \
+ ({ int err; \
+ __asm__ __volatile__ ( \
+ "1: movl %2,%%gs \n" \
+ "2: \n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: incl %0 \n" \
+ " jmp 2b \n" \
+ ".previous \n" \
+ ".section __ex_table,\"a\"\n" \
+ ".align 4 \n" \
+ ".long 1b,3b \n" \
+ ".previous " \
+ : "=&r" (err) : "0" (0), \
+ "m" (*(unsigned int *)&(_s))); \
+ err; })
+
+static inline void check_saved_selectors(struct pt_regs *regs)
{
- if (!(3 & regs->xcs)) die(str, regs, err);
+ /* Prevent recursion. */
+ __asm__ __volatile__ (
+ "movl %0,%%fs; movl %0,%%gs"
+ : : "r" (0) );
+
+ /*
+ * NB. We need to check DS and ES as well, since we may have taken
+ * an exception after they were restored in
+ */
+ if ( check_selector(regs->xds) )
+ regs->xds = 0;
+ if ( check_selector(regs->xes) )
+ regs->xes = 0;
+ if ( check_selector(regs->xfs) )
+ regs->xfs = 0;
+ if ( check_selector(regs->xgs) )
+ regs->xgs = 0;
}
-static void inline do_trap(int trapnr, char *str,
- struct pt_regs * regs,
+
+static inline void do_trap(int trapnr, char *str,
+ struct pt_regs *regs,
long error_code, int use_error_code)
{
struct task_struct *p = current;
if ( (fixup = search_exception_table(regs->eip)) != 0 )
{
regs->eip = fixup;
- regs->xfs = regs->xgs = 0;
+ check_saved_selectors(regs);
return;
}
if ( (fixup = search_exception_table(regs->eip)) != 0 )
{
regs->eip = fixup;
- regs->xfs = regs->xgs = 0;
+ check_saved_selectors(regs);
return;
}
if ( (fixup = search_exception_table(regs->eip)) != 0 )
{
regs->eip = fixup;
- regs->xfs = regs->xgs = 0;
+ check_saved_selectors(regs);
return;
}
put_page_type(page);
put_page_tot(page);
}
+
+ /* Dispose of the (now possibly invalid) mappings from the TLB. */
+ flush_tlb[smp_processor_id()] = 1;
}
+
+
static inline void invalidate_shadow_ldt(void)
{
if ( current->mm.shadow_ldt_mapcnt != 0 )
(current->mm.ldt_base != ptr) )
{
if ( current->mm.ldt_ents != 0 )
- {
invalidate_shadow_ldt();
- flush_tlb[smp_processor_id()] = 1;
- }
current->mm.ldt_base = ptr;
current->mm.ldt_ents = ents;
- load_LDT();
+ load_LDT(current);
}
break;
}
extern void update_process_times(int user);
#include <asm/desc.h>
-static inline void load_LDT(void)
+static inline void load_LDT(struct task_struct *p)
{
unsigned int cpu;
struct desc_struct *desc;
unsigned long ents;
- if ( (ents = current->mm.ldt_ents) == 0 )
+ if ( (ents = p->mm.ldt_ents) == 0 )
{
__asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
}
else
{
cpu = smp_processor_id();
- desc = (struct desc_struct *)GET_GDT_ADDRESS(current) + __LDT(cpu);
+ desc = (struct desc_struct *)GET_GDT_ADDRESS(p) + __LDT(cpu);
desc->a = ((LDT_VIRT_START&0xffff)<<16) | (ents*8-1);
desc->b = (LDT_VIRT_START&(0xff<<24)) | 0x8200 |
((LDT_VIRT_START&0xff0000)>>16);